2024-11-10 12:15:49,773 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-10 12:15:49,792 main DEBUG Took 0.011609 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 12:15:49,792 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 12:15:49,792 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 12:15:49,793 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 12:15:49,795 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,805 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 12:15:49,820 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,822 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,823 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,824 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,825 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,826 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,827 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,828 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,829 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,830 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,831 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,831 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,832 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,833 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,833 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,834 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,835 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,836 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,837 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,837 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,838 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 12:15:49,838 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,839 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 12:15:49,841 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 12:15:49,842 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 12:15:49,844 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 12:15:49,845 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 12:15:49,847 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 12:15:49,847 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 12:15:49,857 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 12:15:49,860 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 12:15:49,862 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 12:15:49,863 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 12:15:49,863 main DEBUG createAppenders(={Console}) 2024-11-10 12:15:49,864 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 initialized 2024-11-10 12:15:49,864 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-10 12:15:49,865 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 OK. 2024-11-10 12:15:49,866 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 12:15:49,866 main DEBUG OutputStream closed 2024-11-10 12:15:49,866 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 12:15:49,867 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 12:15:49,867 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@25fb8912 OK 2024-11-10 12:15:49,938 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 12:15:49,940 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 12:15:49,941 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 12:15:49,941 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 12:15:49,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 12:15:49,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 12:15:49,942 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 12:15:49,943 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 12:15:49,943 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 12:15:49,943 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 12:15:49,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 12:15:49,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 12:15:49,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 12:15:49,944 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 12:15:49,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 12:15:49,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 12:15:49,945 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 12:15:49,946 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 12:15:49,948 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 12:15:49,948 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@64a40280) with optional ClassLoader: null 2024-11-10 12:15:49,948 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 12:15:49,949 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@64a40280] started OK. 2024-11-10T12:15:49,962 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.balancer.TestBalancerDecision timeout: 13 mins 2024-11-10 12:15:49,964 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 12:15:49,964 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T12:15:50,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-10T12:15:50,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T12:15:50,642 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=196, ProcessCount=11, AvailableMemoryMB=6266 2024-11-10T12:15:50,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-10T12:15:50,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T12:15:50,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-10T12:15:50,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=true, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T12:15:50,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:50,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:50,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:50,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:50,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:50,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:50,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:50,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:50,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv331544154=1, srv218764907=0} racks are {rack=0} 2024-11-10T12:15:50,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv331544154=1, srv218764907=0} racks are {rack=0} 2024-11-10T12:15:50,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137710683=0, srv617136889=1} racks are {rack=0} 2024-11-10T12:15:50,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2137710683=0, srv617136889=1} racks are {rack=0} 2024-11-10T12:15:50,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv429423675=0, srv888505069=1} racks are {rack=0} 2024-11-10T12:15:50,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv429423675=0, srv888505069=1} racks are {rack=0} 2024-11-10T12:15:50,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:50,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv787189723=1, srv117551776=0} racks are {rack=0} 2024-11-10T12:15:50,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv787189723=1, srv117551776=0} racks are {rack=0} 2024-11-10T12:15:50,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv787189723=1, srv117551776=0} racks are {rack=0} 2024-11-10T12:15:50,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:50,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1366771256=0, srv1818902979=1} racks are {rack=0} 2024-11-10T12:15:50,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:50,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1366771256=0, srv1818902979=1} racks are {rack=0} 2024-11-10T12:15:50,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1366771256=0, srv1818902979=1} racks are {rack=0} 2024-11-10T12:15:50,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1366771256=0, srv1818902979=1} racks are {rack=0} 2024-11-10T12:15:50,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1638640491=0, srv2013961245=1} racks are {rack=0} 2024-11-10T12:15:50,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1311503204=0, srv1956851766=1} racks are {rack=0} 2024-11-10T12:15:50,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:50,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:50,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:50,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:50,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:50,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:50,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:50,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:50,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:50,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:50,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1027899527=0, srv1223754008=1} racks are {rack=0} 2024-11-10T12:15:50,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-10T12:15:50,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-10T12:15:50,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-10T12:15:50,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-10T12:15:50,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-10T12:15:50,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-10T12:15:50,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-10T12:15:50,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-10T12:15:50,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-10T12:15:50,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-10T12:15:50,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-10T12:15:50,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-10T12:15:50,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-10T12:15:50,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-10T12:15:50,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-10T12:15:50,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-10T12:15:50,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-10T12:15:50,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-10T12:15:50,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-10T12:15:50,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-10T12:15:50,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-10T12:15:50,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-10T12:15:50,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-10T12:15:50,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-10T12:15:50,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-10T12:15:50,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-10T12:15:50,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-10T12:15:50,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-10T12:15:50,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-10T12:15:50,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-10T12:15:50,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-10T12:15:50,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-10T12:15:50,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-10T12:15:50,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-10T12:15:50,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-10T12:15:50,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-10T12:15:50,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-10T12:15:50,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-10T12:15:50,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-10T12:15:50,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-10T12:15:50,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-10T12:15:50,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-10T12:15:50,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-10T12:15:50,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-10T12:15:50,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-10T12:15:50,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-10T12:15:50,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-10T12:15:50,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-10T12:15:50,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-10T12:15:50,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-10T12:15:50,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-10T12:15:50,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-10T12:15:50,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-10T12:15:50,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-10T12:15:50,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-10T12:15:50,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-10T12:15:50,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-10T12:15:50,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-10T12:15:50,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-10T12:15:50,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-10T12:15:50,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-10T12:15:50,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-10T12:15:50,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-10T12:15:50,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-10T12:15:50,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-10T12:15:50,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-10T12:15:50,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-10T12:15:50,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-10T12:15:50,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-10T12:15:50,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-10T12:15:50,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-10T12:15:50,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-10T12:15:50,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-10T12:15:50,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-10T12:15:50,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-10T12:15:50,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-10T12:15:50,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:50,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:50,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:50,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:50,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:50,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:50,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:50,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:50,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-10T12:15:50,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-10T12:15:51,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-10T12:15:51,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-10T12:15:51,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-10T12:15:51,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-10T12:15:51,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-10T12:15:51,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-10T12:15:51,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-10T12:15:51,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-10T12:15:51,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-10T12:15:51,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-10T12:15:51,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-10T12:15:51,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-10T12:15:51,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-10T12:15:51,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-10T12:15:51,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-10T12:15:51,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-10T12:15:51,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-10T12:15:51,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-10T12:15:51,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-10T12:15:51,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-10T12:15:51,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-10T12:15:51,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-10T12:15:51,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-10T12:15:51,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-10T12:15:51,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-10T12:15:51,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-10T12:15:51,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-10T12:15:51,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-10T12:15:51,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-10T12:15:51,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-10T12:15:51,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-10T12:15:51,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-10T12:15:51,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-10T12:15:51,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-10T12:15:51,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-10T12:15:51,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-10T12:15:51,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-10T12:15:51,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-10T12:15:51,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-10T12:15:51,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-10T12:15:51,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-10T12:15:51,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-10T12:15:51,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-10T12:15:51,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-10T12:15:51,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-10T12:15:51,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-10T12:15:51,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-10T12:15:51,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-10T12:15:51,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-10T12:15:51,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-10T12:15:51,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-10T12:15:51,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-10T12:15:51,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-10T12:15:51,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-10T12:15:51,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-10T12:15:51,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-10T12:15:51,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-10T12:15:51,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-10T12:15:51,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-10T12:15:51,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-10T12:15:51,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-10T12:15:51,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-10T12:15:51,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-10T12:15:51,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-10T12:15:51,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-10T12:15:51,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-10T12:15:51,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-10T12:15:51,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-10T12:15:51,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-10T12:15:51,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:51,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:51,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-10T12:15:51,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-10T12:15:51,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-10T12:15:51,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-10T12:15:51,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:51,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-10T12:15:51,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-10T12:15:51,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-10T12:15:51,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-10T12:15:51,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-10T12:15:51,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-10T12:15:51,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-10T12:15:51,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-10T12:15:51,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-10T12:15:51,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-10T12:15:51,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-10T12:15:51,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-10T12:15:51,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:51,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:51,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:51,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-10T12:15:51,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-10T12:15:51,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-10T12:15:51,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-10T12:15:51,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-10T12:15:51,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:51,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-10T12:15:51,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-10T12:15:51,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-10T12:15:51,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-10T12:15:51,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-10T12:15:51,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-10T12:15:51,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-10T12:15:51,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-10T12:15:51,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-10T12:15:51,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-10T12:15:51,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-10T12:15:51,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:51,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-10T12:15:51,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:51,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-10T12:15:51,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:51,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-10T12:15:51,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-10T12:15:51,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-10T12:15:51,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:51,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:51,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-10T12:15:51,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:51,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:51,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-10T12:15:51,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-10T12:15:51,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:51,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:51,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-10T12:15:51,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-10T12:15:51,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-10T12:15:51,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-10T12:15:51,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-10T12:15:51,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-10T12:15:51,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-10T12:15:51,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-10T12:15:51,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-10T12:15:51,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-10T12:15:51,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802232629=0, srv819132341=1} racks are {rack=0} 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:51,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:51,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:51,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:51,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:51,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1511192109=0, srv1865080135=1} racks are {rack=0} 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323392715=2, srv1560244048=0, srv1868707892=1} racks are {rack=0} 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv323392715=2, srv1560244048=0, srv1868707892=1} racks are {rack=0} 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1809149922=1, srv2138465946=2, srv1714934639=0} racks are {rack=0} 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1809149922=1, srv2138465946=2, srv1714934639=0} racks are {rack=0} 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1809149922=1, srv2138465946=2, srv1714934639=0} racks are {rack=0} 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv700239480=2, srv1895755143=0, srv612192179=1} racks are {rack=0} 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv700239480=2, srv1895755143=0, srv612192179=1} racks are {rack=0} 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1049570323=0, srv170367814=1, srv2131990610=2} racks are {rack=0} 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1049570323=0, srv170367814=1, srv2131990610=2} racks are {rack=0} 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1049570323=0, srv170367814=1, srv2131990610=2} racks are {rack=0} 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1676900839=0, srv1762011161=1, srv392564078=2} racks are {rack=0} 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1676900839=0, srv1762011161=1, srv392564078=2} racks are {rack=0} 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1676900839=0, srv1762011161=1, srv392564078=2} racks are {rack=0} 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1676900839=0, srv1762011161=1, srv392564078=2} racks are {rack=0} 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:51,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:51,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1928514359=1, srv1670151478=0, srv999560515=2} racks are {rack=0} 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv10810603=0, srv305111274=1, srv743580407=3, srv375651496=2} racks are {rack=0} 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv10810603=0, srv305111274=1, srv743580407=3, srv375651496=2} racks are {rack=0} 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv10810603=0, srv305111274=1, srv743580407=3, srv375651496=2} racks are {rack=0} 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv715755921=3, srv2065322572=1, srv402796495=2, srv1607061980=0} racks are {rack=0} 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv715755921=3, srv2065322572=1, srv402796495=2, srv1607061980=0} racks are {rack=0} 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv715755921=3, srv2065322572=1, srv402796495=2, srv1607061980=0} racks are {rack=0} 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv715755921=3, srv2065322572=1, srv402796495=2, srv1607061980=0} racks are {rack=0} 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv620424833=2, srv236790934=1, srv957131931=3, srv1351883186=0} racks are {rack=0} 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv620424833=2, srv236790934=1, srv957131931=3, srv1351883186=0} racks are {rack=0} 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv620424833=2, srv236790934=1, srv957131931=3, srv1351883186=0} racks are {rack=0} 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv620424833=2, srv236790934=1, srv957131931=3, srv1351883186=0} racks are {rack=0} 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv620424833=2, srv236790934=1, srv957131931=3, srv1351883186=0} racks are {rack=0} 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv219421051=1, srv1415762733=0, srv443118282=2, srv812980049=3} racks are {rack=0} 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv219421051=1, srv1415762733=0, srv443118282=2, srv812980049=3} racks are {rack=0} 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv219421051=1, srv1415762733=0, srv443118282=2, srv812980049=3} racks are {rack=0} 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv219421051=1, srv1415762733=0, srv443118282=2, srv812980049=3} racks are {rack=0} 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv219421051=1, srv1415762733=0, srv443118282=2, srv812980049=3} racks are {rack=0} 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv219421051=1, srv1415762733=0, srv443118282=2, srv812980049=3} racks are {rack=0} 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1358550953=0, srv1511999519=1, srv92117694=3, srv402960072=2} racks are {rack=0} 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1358550953=0, srv1511999519=1, srv92117694=3, srv402960072=2} racks are {rack=0} 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1358550953=0, srv1511999519=1, srv92117694=3, srv402960072=2} racks are {rack=0} 2024-11-10T12:15:51,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1358550953=0, srv1511999519=1, srv92117694=3, srv402960072=2} racks are {rack=0} 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1358550953=0, srv1511999519=1, srv92117694=3, srv402960072=2} racks are {rack=0} 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1358550953=0, srv1511999519=1, srv92117694=3, srv402960072=2} racks are {rack=0} 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv326121145=3, srv1742535819=2, srv1426349465=0, srv1470944090=1} racks are {rack=0} 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv326121145=3, srv1742535819=2, srv1426349465=0, srv1470944090=1} racks are {rack=0} 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv326121145=3, srv1742535819=2, srv1426349465=0, srv1470944090=1} racks are {rack=0} 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv326121145=3, srv1742535819=2, srv1426349465=0, srv1470944090=1} racks are {rack=0} 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv326121145=3, srv1742535819=2, srv1426349465=0, srv1470944090=1} racks are {rack=0} 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv326121145=3, srv1742535819=2, srv1426349465=0, srv1470944090=1} racks are {rack=0} 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv713128205=2, srv470958852=1, srv1690348598=0, srv98235019=3} racks are {rack=0} 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv713128205=2, srv470958852=1, srv1690348598=0, srv98235019=3} racks are {rack=0} 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv713128205=2, srv470958852=1, srv1690348598=0, srv98235019=3} racks are {rack=0} 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv713128205=2, srv470958852=1, srv1690348598=0, srv98235019=3} racks are {rack=0} 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv713128205=2, srv470958852=1, srv1690348598=0, srv98235019=3} racks are {rack=0} 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv713128205=2, srv470958852=1, srv1690348598=0, srv98235019=3} racks are {rack=0} 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv545867470=2, srv211580794=1, srv2061936756=0, srv857379587=3} racks are {rack=0} 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:51,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1543577303=0, srv854040932=3, srv16916033=1, srv281088656=2} racks are {rack=0} 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1864712293=3, srv1399051328=1, srv1386938226=0, srv1474083058=2} racks are {rack=0} 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-10T12:15:51,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1157276339=0, srv555375169=4, srv443992856=3, srv1344617726=1, srv1664358041=2} racks are {rack=0} 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1157276339=0, srv555375169=4, srv443992856=3, srv1344617726=1, srv1664358041=2} racks are {rack=0} 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1157276339=0, srv555375169=4, srv443992856=3, srv1344617726=1, srv1664358041=2} racks are {rack=0} 2024-11-10T12:15:51,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1157276339=0, srv555375169=4, srv443992856=3, srv1344617726=1, srv1664358041=2} racks are {rack=0} 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:51,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-10T12:15:51,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-10T12:15:51,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-10T12:15:51,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-10T12:15:51,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-10T12:15:51,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-10T12:15:51,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-10T12:15:51,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-10T12:15:51,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-10T12:15:51,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-10T12:15:51,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-10T12:15:51,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-10T12:15:51,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-10T12:15:51,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-10T12:15:51,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-10T12:15:51,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-10T12:15:51,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-10T12:15:51,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-10T12:15:51,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-10T12:15:51,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-10T12:15:51,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-10T12:15:51,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-10T12:15:51,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-10T12:15:51,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-10T12:15:51,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-10T12:15:51,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-10T12:15:51,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-10T12:15:51,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-10T12:15:51,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-10T12:15:51,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-10T12:15:51,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-10T12:15:51,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-10T12:15:51,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-10T12:15:51,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-10T12:15:51,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-10T12:15:51,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-10T12:15:51,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-10T12:15:51,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-10T12:15:51,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-10T12:15:51,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-10T12:15:51,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-10T12:15:51,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-10T12:15:51,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-10T12:15:51,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-10T12:15:51,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-10T12:15:51,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-10T12:15:51,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-10T12:15:51,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-10T12:15:51,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-10T12:15:51,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-10T12:15:51,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-10T12:15:51,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-10T12:15:51,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-10T12:15:51,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-10T12:15:51,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-10T12:15:51,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-10T12:15:51,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-10T12:15:51,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-10T12:15:51,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-10T12:15:51,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-10T12:15:51,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-10T12:15:51,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-10T12:15:51,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-10T12:15:51,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-10T12:15:51,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-10T12:15:51,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-10T12:15:51,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-10T12:15:51,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-10T12:15:51,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-10T12:15:51,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:51,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-10T12:15:51,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-10T12:15:51,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-10T12:15:51,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-10T12:15:51,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:51,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-10T12:15:51,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-10T12:15:51,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-10T12:15:51,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-10T12:15:51,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-10T12:15:51,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:51,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:51,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-10T12:15:52,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:52,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-10T12:15:52,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-10T12:15:52,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-10T12:15:52,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-10T12:15:52,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-10T12:15:52,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-10T12:15:52,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-10T12:15:52,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-10T12:15:52,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-10T12:15:52,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-10T12:15:52,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-10T12:15:52,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-10T12:15:52,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-10T12:15:52,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-10T12:15:52,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-10T12:15:52,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-10T12:15:52,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-10T12:15:52,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-10T12:15:52,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-10T12:15:52,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-10T12:15:52,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:52,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-10T12:15:52,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-10T12:15:52,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-10T12:15:52,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-10T12:15:52,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-10T12:15:52,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-10T12:15:52,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-10T12:15:52,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-10T12:15:52,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-10T12:15:52,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:52,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-10T12:15:52,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-10T12:15:52,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-10T12:15:52,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-10T12:15:52,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-10T12:15:52,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-10T12:15:52,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-10T12:15:52,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-10T12:15:52,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-10T12:15:52,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-10T12:15:52,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-10T12:15:52,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-10T12:15:52,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-10T12:15:52,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1119392703=0, srv254482127=3, srv891047081=5, srv1148487265=1, srv493596571=4, srv2125714325=2} racks are {rack=0} 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-10T12:15:52,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-10T12:15:52,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-10T12:15:52,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-10T12:15:52,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-10T12:15:52,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-10T12:15:52,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-10T12:15:52,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-10T12:15:52,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-10T12:15:52,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-10T12:15:52,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-10T12:15:52,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-10T12:15:52,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-10T12:15:52,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-10T12:15:52,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-10T12:15:52,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-10T12:15:52,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-10T12:15:52,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-10T12:15:52,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-10T12:15:52,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-10T12:15:52,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-10T12:15:52,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-10T12:15:52,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-10T12:15:52,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-10T12:15:52,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-10T12:15:52,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-10T12:15:52,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-10T12:15:52,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-10T12:15:52,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-10T12:15:52,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-10T12:15:52,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-10T12:15:52,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-10T12:15:52,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-10T12:15:52,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-10T12:15:52,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-10T12:15:52,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-10T12:15:52,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-10T12:15:52,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-10T12:15:52,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-10T12:15:52,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-10T12:15:52,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-10T12:15:52,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-10T12:15:52,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-10T12:15:52,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-10T12:15:52,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-10T12:15:52,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-10T12:15:52,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-10T12:15:52,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-10T12:15:52,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-10T12:15:52,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-10T12:15:52,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-10T12:15:52,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-10T12:15:52,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-10T12:15:52,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-10T12:15:52,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-10T12:15:52,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:52,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-10T12:15:52,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-10T12:15:52,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-10T12:15:52,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-10T12:15:52,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-10T12:15:52,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-10T12:15:52,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-10T12:15:52,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-10T12:15:52,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-10T12:15:52,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-10T12:15:52,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-10T12:15:52,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-10T12:15:52,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-10T12:15:52,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-10T12:15:52,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-10T12:15:52,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-10T12:15:52,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-10T12:15:52,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-10T12:15:52,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:52,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-10T12:15:52,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-10T12:15:52,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,598 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,599 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,599 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,600 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:52,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-10T12:15:52,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-10T12:15:52,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:52,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-10T12:15:52,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-10T12:15:52,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-10T12:15:52,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-10T12:15:52,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-10T12:15:52,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-10T12:15:52,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-10T12:15:52,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-10T12:15:52,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-10T12:15:52,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-10T12:15:52,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-10T12:15:52,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-10T12:15:52,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-10T12:15:52,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-10T12:15:52,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-10T12:15:52,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-10T12:15:52,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:52,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-10T12:15:52,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-10T12:15:52,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-10T12:15:52,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-10T12:15:52,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-10T12:15:52,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-10T12:15:52,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-10T12:15:52,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-10T12:15:52,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-10T12:15:52,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-10T12:15:52,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-10T12:15:52,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-10T12:15:52,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1862415651=4, srv404653056=5, srv1058078852=0, srv1718033903=3, srv1213432687=1, srv151212247=2} racks are {rack=0} 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-10T12:15:52,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1206728094=1, srv1245761626=2, srv2037623614=6, srv638135724=13, srv493070337=12, srv431101338=9, srv1074924595=0, srv2096944890=7, srv90418650=14, srv1982543325=5, srv32879627=8, srv466146356=11, srv1789984318=4, srv1780605078=3, srv462917789=10} racks are {rack=0} 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1294914926=1, srv382892187=6, srv1222927212=0, srv1907926830=3, srv808988896=9, srv2113361593=4, srv1789938780=2, srv2126103609=5, srv502572699=7, srv798361115=8} racks are {rack=0} 2024-11-10T12:15:52,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1196355438=1, srv1175873234=0, srv1593214760=4, srv1382300755=2, srv144629477=3, srv2127150812=8, srv181439201=7, srv1791614019=6, srv1777089928=5, srv378627054=9} racks are {rack=0} 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1196355438=1, srv1175873234=0, srv1593214760=4, srv1382300755=2, srv144629477=3, srv2127150812=8, srv181439201=7, srv1791614019=6, srv1777089928=5, srv378627054=9} racks are {rack=0} 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1196355438=1, srv1175873234=0, srv1593214760=4, srv1382300755=2, srv144629477=3, srv2127150812=8, srv181439201=7, srv1791614019=6, srv1777089928=5, srv378627054=9} racks are {rack=0} 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1196355438=1, srv1175873234=0, srv1593214760=4, srv1382300755=2, srv144629477=3, srv2127150812=8, srv181439201=7, srv1791614019=6, srv1777089928=5, srv378627054=9} racks are {rack=0} 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1196355438=1, srv1175873234=0, srv1593214760=4, srv1382300755=2, srv144629477=3, srv2127150812=8, srv181439201=7, srv1791614019=6, srv1777089928=5, srv378627054=9} racks are {rack=0} 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1196355438=1, srv1175873234=0, srv1593214760=4, srv1382300755=2, srv144629477=3, srv2127150812=8, srv181439201=7, srv1791614019=6, srv1777089928=5, srv378627054=9} racks are {rack=0} 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv759095682=7, srv80283425=8, srv331177875=4, srv1115372911=0, srv514740541=5, srv901303069=9, srv1163004138=1, srv1391382639=2, srv1796934020=3, srv678501294=6} racks are {rack=0} 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv412143967=5, srv742657439=8, srv1157885725=0, srv1385725417=1, srv564664774=6, srv34205990=3, srv690926938=7, srv920029321=9, srv1524961767=2, srv349777218=4} racks are {rack=0} 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv238331686=6, srv1803258889=2, srv211100724=4, srv773514813=8, srv1025172841=0, srv236092786=5, srv1955278324=3, srv314763403=7, srv922499564=9, srv1670365424=1} racks are {rack=0} 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv671357767=9, srv1131626037=1, srv1981717255=8, srv1035499714=0, srv1754824089=5, srv184911725=7, srv1785754483=6, srv1150981628=2, srv133930121=3, srv1599497826=4} racks are {rack=0} 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1961897875=4, srv349275167=5, srv571616640=8, srv600473405=9, srv1488681744=0, srv1577271677=1, srv409496291=6, srv495108303=7, srv1773525971=2, srv1784784721=3} racks are {rack=0} 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1483614416=3, srv1003501682=0, srv459440189=7, srv1761520687=4, srv105241624=1, srv951799536=8, srv1962095225=6, srv1256224991=2, srv978456669=9, srv1954980410=5} racks are {rack=0} 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv771621905=9, srv1837556204=6, srv1458319630=3, srv151498554=4, srv1365171234=2, srv1096973579=0, srv1694694506=5, srv2129332804=8, srv1167644862=1, srv1860658111=7} racks are {rack=0} 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:52,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:52,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:52,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:52,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:52,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1890935239=1, srv435907394=6, srv1515240165=0, srv554178323=8, srv1900927667=2, srv270093782=5, srv2600608=4, srv507220146=7, srv734735337=9, srv1962334942=3} racks are {rack=0} 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:52,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:52,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:52,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:52,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-10T12:15:52,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:52,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:52,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:52,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1008693140=0, srv1733953274=2, srv1785215339=3, srv2019984325=4, srv2083387801=5, srv36409843=7, srv659753029=8, srv734279475=9, srv1495296035=1, srv2131586981=6} racks are {rack=0} 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:52,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:52,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:53,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv142900549=0, srv505111921=6, srv1726496422=1, srv1845818671=2, srv233294209=4, srv910145461=7, srv303022425=5, srv223839586=3} racks are {rack=0} 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:53,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:53,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:53,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1113559711=0, srv1689516196=1, srv528719712=5, srv765138053=7, srv2094402496=3, srv644018887=6, srv1706760118=2, srv2098354973=4} racks are {rack=0} 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:53,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-10T12:15:53,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-10T12:15:53,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:53,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:53,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:53,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:53,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:53,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:53,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:53,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:53,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:53,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:53,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:53,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv390840763=5, srv675300916=7, srv1818532494=2, srv1755332222=1, srv1353557903=0, srv2049484836=3, srv2057868539=4, srv630346962=6} racks are {rack=0} 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:53,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-10T12:15:53,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-10T12:15:53,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-10T12:15:53,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:53,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-10T12:15:53,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:53,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-10T12:15:53,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-10T12:15:53,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:53,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-10T12:15:53,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:53,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-10T12:15:53,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-10T12:15:53,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-10T12:15:53,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-10T12:15:53,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:53,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:53,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:53,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:53,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:53,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:53,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:53,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:53,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv333171611=4, srv2119730388=3, srv1241714515=0, srv451832803=6, srv747115911=7, srv128492071=1, srv1362451007=2, srv368787641=5} racks are {rack=0} 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1552753095=1, srv339455786=4, srv2102082152=2, srv2116453419=3, srv1504365728=0} racks are {rack=0} 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:53,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1552753095=1, srv339455786=4, srv2102082152=2, srv2116453419=3, srv1504365728=0} racks are {rack=0} 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1552753095=1, srv339455786=4, srv2102082152=2, srv2116453419=3, srv1504365728=0} racks are {rack=0} 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1552753095=1, srv339455786=4, srv2102082152=2, srv2116453419=3, srv1504365728=0} racks are {rack=0} 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1552753095=1, srv339455786=4, srv2102082152=2, srv2116453419=3, srv1504365728=0} racks are {rack=0} 2024-11-10T12:15:53,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-10T12:15:53,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-10T12:15:53,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-10T12:15:53,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-10T12:15:53,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-10T12:15:53,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-10T12:15:53,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-10T12:15:53,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-10T12:15:53,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-10T12:15:53,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-10T12:15:53,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-10T12:15:53,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-10T12:15:53,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-10T12:15:53,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-10T12:15:53,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-10T12:15:53,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-10T12:15:53,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-10T12:15:53,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-10T12:15:53,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-10T12:15:53,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-10T12:15:53,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-10T12:15:53,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-10T12:15:53,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-10T12:15:53,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-10T12:15:53,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-10T12:15:53,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-10T12:15:53,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-10T12:15:53,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-10T12:15:53,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-10T12:15:53,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-10T12:15:53,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-10T12:15:53,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-10T12:15:53,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,598 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,599 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,601 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-10T12:15:53,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-10T12:15:53,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-10T12:15:53,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-10T12:15:53,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-10T12:15:53,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-10T12:15:53,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-10T12:15:53,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-10T12:15:53,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-10T12:15:53,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-10T12:15:53,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-10T12:15:53,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1734689245=146, srv55552185=298, srv687285532=322, srv314620387=246, srv431298418=270, srv2093054278=213, srv749937340=334, srv1599836022=117, srv1497473735=96, srv1161798607=29, srv339523149=252, srv1381382277=81, srv1174063897=33, srv291126840=239, srv343660908=253, srv1010822486=4, srv1135684005=24, srv630244855=307, srv1730933026=144, srv2042194657=199, srv1335029994=73, srv1864900392=173, srv723938317=330, srv1828636429=167, srv1984315490=190, srv851098379=354, srv698046776=325, srv1217977027=46, srv165493628=126, srv1527790058=99, srv274701078=232, srv1504038425=97, srv2067442867=208, srv53810407=293, srv541547366=295, srv646610249=311, srv1870148105=174, srv19092224=181, srv649960609=313, srv643938163=310, srv1016069291=7, srv444537549=277, srv716304993=328, srv941095689=380, srv1321587986=68, srv823260919=348, srv1343875663=75, srv1872234270=175, srv701879776=326, srv1159042040=28, srv103776630=11, srv241563669=226, srv493484685=285, srv2057565064=206, srv1811766479=162, srv129919009=64, srv841363026=351, srv748063000=333, srv438230908=274, srv893426091=369, srv990661571=390, srv1168281939=30, srv1201258727=42, srv922219875=378, srv41367420=266, srv207519449=209, srv1715253026=140, srv210749610=216, srv122193410=48, srv983699019=386, srv497369504=287, srv61110982=305, srv249357434=228, srv812974896=344, srv1588791389=111, srv986786914=388, srv1413243229=85, srv850656477=353, srv827310801=350, srv1253329238=55, srv1249430212=54, srv1702502173=136, srv1981607148=189, srv2034811311=198, srv503905903=289, srv220532600=222, srv425801660=268, srv1773070629=155, srv1053012350=15, srv1770592114=153, srv280889280=235, srv588854495=302, srv1691873893=134, srv1275457490=59, srv53466593=292, srv1231263946=51, srv1832578491=168, srv1273167907=58, srv1000248154=0, srv2025114605=196, srv766712954=337, srv885574043=363, srv1595443485=114, srv58869910=301, srv1629175832=123, srv1182836456=36, srv764731724=336, srv1136940776=26, srv1347099362=76, srv1213179414=44, srv1732077681=145, srv1269161388=57, srv2051006739=202, srv819029475=345, srv1600997742=118, srv433670581=272, srv561708991=299, srv1136517158=25, srv678794423=319, srv962736476=383, srv893884593=370, srv1381161059=80, srv1308882814=65, srv383011775=260, srv592983118=303, srv27556041=233, srv877238485=360, srv269898261=231, srv1476299868=94, srv822106742=346, srv426909672=269, srv1704763839=137, srv1038332406=12, srv1571172359=107, srv242224253=227, srv313748728=245, srv1818279581=163, srv90457075=374, srv1674629867=131, srv120142472=43, srv1578446492=109, srv1069716923=16, srv1932916773=186, srv1595124777=113, srv76466209=335, srv767094005=338, srv1321320032=67, srv906910261=375, srv1017939364=8, srv1267820887=56, srv1415085233=86, srv17247658=142, srv797275236=342, srv771087770=339, srv1681617663=133, srv796003712=341, srv1150787653=27, srv517464108=290, srv36319079=256, srv316498508=248, srv1028722611=10, srv703252577=327, srv1462580105=90, srv1458777213=89, srv877047092=359, srv1726251191=143, srv296822818=241, srv1712621466=138, srv1193047945=39, srv322346539=250, srv1322399578=70, srv788945656=340, srv1609254376=120, srv1545026016=104, srv1597056079=116, srv1826722158=166, srv940907826=379, srv1928143300=184, srv974953891=385, srv1873469454=176, srv441054763=276, srv1456455957=88, srv1198213074=41, srv472420794=281, srv1835638183=169, srv730642039=331, srv1805730787=161, srv1172640160=31, srv30919585=242, srv1766353309=152, srv1955505921=187, srv387913134=262, srv1509210325=98, srv2056078302=204, srv1173061843=32, srv679034124=320, srv1465616426=92, srv1572802259=108, srv440259053=275, srv1771425629=154, srv1466814538=93, srv25732367=230, srv1322717426=71, srv60949983=304, srv620444359=306, srv1895847336=179, srv953075612=381, srv886658453=364, srv86767944=358, srv411753147=265, srv550147793=297, srv661516263=316, srv1547445292=105, srv46295533=279, srv2111266832=217, srv480573972=283, srv249659294=229, srv1758009344=150, srv388374483=263, srv334510727=251, srv1462796503=91, srv222677088=223, srv1661291474=127, srv102316208=9, srv1093822751=18, srv1540339905=103, srv1428611082=87, srv1801478405=160, srv1564076837=106, srv899225868=372, srv897648579=371, srv1924169407=183, srv418211176=267, srv893152720=368, srv1722665260=141, srv1187996206=38, srv134833780=77, srv660597752=315, srv1178500613=35, srv1612137588=121, srv226898853=224, srv460826337=278, srv494915375=286, srv1821987417=164, srv921574221=377, srv1742729106=147, srv95904324=382, srv1750790985=149, srv165106249=125, srv2006847657=193, srv232417609=225, srv1796136011=158, srv1961231374=188, srv1050613602=14, srv1216064861=45, srv290915795=238, srv1375977966=79, srv1530898456=101, srv1672884094=129, srv498582253=288, srv1750170824=148, srv891872277=366, srv208382315=211, srv652832021=314, srv353061315=255, srv1621454953=122, srv1225088102=50, srv889878670=365, srv550048709=296, srv1218063784=47, srv1838457552=170, srv345161901=254, srv1195461817=40, srv1294855153=63, srv2080612630=210, srv822549293=347, srv474065764=282, srv29236633=240, srv390562329=264, srv646706346=312, srv1099967528=22, srv1277475533=60, srv1662578055=128, srv2056855020=205, srv63161220=308, srv128313457=62, srv964404399=384, srv1532838781=102, srv1674228703=130, srv2105647128=215, srv1247318018=53, srv1410552814=84, srv899269344=373, srv992371073=391, srv170145178=135, srv2045168719=201, srv1081171173=17, srv1280865929=61, srv993006729=392, srv80592970=343, srv1889456437=177, srv1678251312=132, srv2087667240=212, srv2142211222=220, srv1389132559=82, srv882356927=361, srv1989489955=191, srv89248461=367, srv290082886=237, srv1097430727=20, srv283723244=236, srv57848448=300, srv2118067269=218, srv693744933=324, srv184017258=171, srv466261151=280, srv885030150=362, srv1096494900=19, srv2146185884=221, srv677623082=318, srv1798287256=159, srv433112941=271, srv825222547=349, srv374034486=258, srv1015303842=6, srv2093414334=214, srv160195247=119, srv280816132=234, srv1826501821=165, srv1238564657=52, srv113193014=23, srv691011961=323, srv867318611=357, srv1913622304=182, srv383155738=261, srv1530339386=100, srv2000773370=192, srv363671639=257, srv853261421=355, srv1497449008=95, srv434436993=273, srv1007301318=3, srv1932235733=185, srv2019965778=195, srv914627725=376, srv2026481078=197, srv30989043=243, srv377631637=259, srv310528377=244, srv1012794192=5, srv2018726825=194, srv2043700935=200, srv1390606734=83, srv1185753704=37, srv682825858=321, srv1321976799=69, srv1223712837=49, srv1847745097=172, srv1360354757=78, srv1759908402=151, srv986966487=389, srv1789173198=156, srv1644000104=124, srv639197286=309, srv2063856851=207, srv1713318920=139, srv865225224=356, srv316249964=247, srv741203282=332, srv2128770627=219, srv1331651409=72, srv721067023=329, srv1001519287=1, srv1596998787=115, srv319245694=249, srv1311067193=66, srv1003511544=2, srv1890973676=178, srv540874481=294, srv1897494973=180, srv1335500986=74, srv1097859359=21, srv484049893=284, srv179425391=157, srv2053375245=203, srv1174311556=34, srv985702792=387, srv1592197886=112, srv664802163=317, srv529843422=291, srv845795161=352, srv104536253=13, srv1583278123=110} racks are {rack=0} 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:15:53,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:15:53,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:15:53,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:15:53,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:15:53,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:15:53,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:15:53,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:15:53,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:15:53,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:15:53,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:15:53,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:15:53,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-10T12:15:53,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-10T12:15:53,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-10T12:15:53,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv308537151=1, srv1330483243=0} racks are {rack=0} 2024-11-10T12:15:53,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-10T12:15:53,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=3200 2024-11-10T12:15:53,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 3200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv87050013=1, srv1630070226=0} racks are {rack=0} 2024-11-10T12:15:53,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-10T12:15:53,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv470703272=0, srv587489411=1} racks are {rack=0} 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2065311907=1, srv1812983501=0} racks are {rack=0} 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=2, number of racks=1 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1202316756=0, srv1910760336=1} racks are {rack=0} 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=2, number of racks=1 2024-11-10T12:15:53,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.2888503755054882 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv148060769=0, srv773758289=1} racks are {rack=0} 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1626475419=0, srv641885495=1} racks are {rack=0} 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv352688908=0, srv932923150=1} racks are {rack=0} 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=2, number of racks=1 2024-11-10T12:15:53,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.6932409012131716 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.8); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv576387612=1, srv2131284243=0} racks are {rack=0} 2024-11-10T12:15:53,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1432, number of hosts=2, number of racks=1 2024-11-10T12:15:53,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.40878413881917497 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4717368961973279); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv973216258=1, srv1232351158=0} racks are {rack=0} 2024-11-10T12:15:53,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=53, number of hosts=2, number of racks=1 2024-11-10T12:15:53,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.034662045060658585 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.04000000000000001); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv419761547=2, srv1333319281=0, srv27049451=1} racks are {rack=0} 2024-11-10T12:15:53,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-10T12:15:53,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608543, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-10T12:15:53,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 43 ms to try 7200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.5003035261608543 to a new imbalance of 0.004043905257076833. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv900021329=2, srv1241846941=0, srv87238478=1} racks are {rack=0} 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.25015176308042714 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1257907436=0, srv237385510=1, srv566672225=2} racks are {rack=0} 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2888503755054882, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-10T12:15:53,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 33 ms to try 9600 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2888503755054882 to a new imbalance of 0.0030329289428076256. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.25); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2058821022=1, srv102353598=0, srv442112379=2} racks are {rack=0} 2024-11-10T12:15:53,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-10T12:15:53,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-10T12:15:53,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 14 ms to try 7200 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:53,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1848826167=1, srv310851421=2, srv1828603398=0} racks are {rack=0} 2024-11-10T12:15:53,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:53,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:53,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:53,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:53,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:53,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:53,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=3, number of racks=1 2024-11-10T12:15:53,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:53,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-10T12:15:54,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 17 ms to try 9600 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2001735124=0, srv666395083=2, srv425075978=1} racks are {rack=0} 2024-11-10T12:15:54,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=20, number of hosts=3, number of racks=1 2024-11-10T12:15:54,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42216593343109815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=96000 2024-11-10T12:15:54,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 175 ms to try 96000 different iterations. Found a solution that moves 13 regions; Going from a computed imbalance of 0.42216593343109815 to a new imbalance of 0.003942807625649913. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.325); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv133505001=1, srv1603309946=2, srv1120051687=0, srv321179022=3} racks are {rack=0} 2024-11-10T12:15:54,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=4, number of racks=1 2024-11-10T12:15:54,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25526148491585815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-10T12:15:54,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 30 ms to try 19200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25526148491585815 to a new imbalance of 0.0020219526285384167. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv968225984=3, srv1929250188=0, srv89688143=2, srv1935685903=1} racks are {rack=0} 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=4, number of racks=1 2024-11-10T12:15:54,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=12800 2024-11-10T12:15:54,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 18 ms to try 12800 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.009098786828422877. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.75); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv802113061=2, srv265143103=1, srv1759436209=0, srv985378587=3} racks are {rack=0} 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=4, number of racks=1 2024-11-10T12:15:54,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=16000 2024-11-10T12:15:54,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 23 ms to try 16000 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.007279029462738302. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1834885018=3, srv1174771427=1, srv1663884562=2, srv1115827804=0} racks are {rack=0} 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-10T12:15:54,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608542, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=38400 2024-11-10T12:15:54,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 55 ms to try 38400 different iterations. Found a solution that moves 6 regions; Going from a computed imbalance of 0.5003035261608542 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv583496189=2, srv1781876480=1, srv946789454=3, srv1617263427=0} racks are {rack=0} 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-10T12:15:54,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6127441778046339, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=25600 2024-11-10T12:15:54,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 35 ms to try 25600 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6127441778046339 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2141009097=2, srv1349265970=0, srv1396225967=1, srv992086333=3} racks are {rack=0} 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-10T12:15:54,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6852343510309111, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-10T12:15:54,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 33 ms to try 22400 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6852343510309111 to a new imbalance of 0.006932409012131715. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5714285714285714); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1173984393=0, srv2119356789=2, srv1705602976=1, srv36397535=3} racks are {rack=0} 2024-11-10T12:15:54,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-10T12:15:54,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-10T12:15:54,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 28 ms to try 19200 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1927451785=1, srv2003351585=3, srv1986163543=2, srv1674468867=0} racks are {rack=0} 2024-11-10T12:15:54,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0962834585018294 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.11111111111111113); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1315859571=0, srv1442687614=1, srv371050533=3, srv1475179769=2} racks are {rack=0} 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=4, number of racks=1 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:54,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.17331022530329285 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.19999999999999996); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1704871691=0, srv708661331=3, srv2037849986=1, srv411779454=2} racks are {rack=0} 2024-11-10T12:15:54,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-10T12:15:54,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-10T12:15:54,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 33 ms to try 22400 different iterations. Found a solution that moves 5 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008665511265164644. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.7142857142857143); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv569803206=4, srv1043579727=0, srv2140967068=3, srv1221557367=1, srv2065519235=2} racks are {rack=0} 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:54,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=5, number of racks=1 2024-11-10T12:15:54,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:15:54,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.22705408170595567 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.26202041028867284); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:15:54,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1922332582=2, srv665304523=4, srv1886224588=1, srv536187602=3, srv169490846=0, srv77931366=5} racks are {rack=0} 2024-11-10T12:15:54,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:15:54,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:15:54,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:15:54,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:15:54,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:15:54,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:15:54,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:15:54,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:15:54,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:15:54,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:15:54,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:15:54,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:15:54,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-10T12:15:54,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:15:54,513 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:14448000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-10T12:15:54,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.38476461962415054, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-10T12:16:08,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 13553 ms to try 1000000 different iterations. Found a solution that moves 1019 regions; Going from a computed imbalance of 0.38476461962415054 to a new imbalance of 0.004107049292652452. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.33853820598006645); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:08,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:08,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv443738410=3, srv548534430=4, srv1747111509=1, srv1012057550=0, srv1798976275=2, srv822169934=5} racks are {rack=0} 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:08,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:08,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:08,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:08,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:08,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:08,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:08,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-10T12:16:08,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:08,123 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:16800000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-10T12:16:08,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2979275647131677, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-10T12:16:22,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 13944 ms to try 1000000 different iterations. Found a solution that moves 916 regions; Going from a computed imbalance of 0.2979275647131677 to a new imbalance of 0.003175043327556326. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.26171428571428573); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:22,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:22,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1082470954=2, srv559529370=12, srv134080700=5, srv1126390929=3, srv14711722=6, srv419409145=11, srv929726029=13, srv1050594785=0, srv1848339559=7, srv1997845219=9, srv241405503=10, srv1926690205=8, srv976843813=14, srv105115445=1, srv1306198328=4} racks are {rack=0} 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:16:22,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:16:22,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=15, number of hosts=15, number of racks=1 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.12507588154021357 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.14433756729740646); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv564419677=7, srv1897059705=1, srv1914774323=2, srv528318636=5, srv1961498889=3, srv554842918=6, srv470251467=4, srv876827623=9, srv1395969809=0, srv738628505=8} racks are {rack=0} 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:22,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:22,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-10T12:16:22,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 121 ms to try 80000 different iterations. Found a solution that moves 9 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010918544194107453. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1781027405=2, srv1944655084=4, srv1817640970=3, srv281297037=8, srv1950197770=5, srv2094423962=7, srv163071208=0, srv1771369260=1, srv2043259142=6, srv493997912=9} racks are {rack=0} 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:22,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=10, number of racks=1 2024-11-10T12:16:22,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.055531997651093117 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.06408392528936147); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1703820788=4, srv1220166924=2, srv1909374059=5, srv741217436=7, srv1502614444=3, srv60424697=6, srv1067753680=0, srv1108606644=1, srv830459163=8, srv838392002=9} racks are {rack=0} 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:22,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:22,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=54, number of hosts=10, number of racks=1 2024-11-10T12:16:22,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:22,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164644, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=432000 2024-11-10T12:16:23,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 831 ms to try 432000 different iterations. Found a solution that moves 48 regions; Going from a computed imbalance of 0.8665511265164644 to a new imbalance of 0.01078374735220489. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8888888888888888); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:23,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:23,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1123055589=0, srv198455292=6, srv261334221=7, srv159006210=2, srv648209789=9, srv1688654006=3, srv1817963952=4, srv1862171452=5, srv316018045=8, srv1517052588=1} racks are {rack=0} 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:23,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=55, number of hosts=10, number of racks=1 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:23,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=440000 2024-11-10T12:16:23,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 915 ms to try 440000 different iterations. Found a solution that moves 49 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010808255868914448. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8909090909090909); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:23,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:23,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv263049843=5, srv883188843=9, srv1567804728=1, srv470320468=6, srv1742435567=2, srv21610250=4, srv704016319=7, srv1917575376=3, srv765375604=8, srv1157527128=0} racks are {rack=0} 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:23,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=10, number of racks=1 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:23,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=448000 2024-11-10T12:16:24,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 890 ms to try 448000 different iterations. Found a solution that moves 50 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010831889081455806. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8928571428571429); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv678316787=7, srv1959168618=3, srv454201470=6, srv749693419=8, srv970818087=9, srv1812184370=2, srv1026407733=0, srv2025184546=4, srv1730780521=1, srv2092150852=5} racks are {rack=0} 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:24,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=16, number of hosts=10, number of racks=1 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:24,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=128000 2024-11-10T12:16:25,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 211 ms to try 128000 different iterations. Found a solution that moves 14 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.01061525129982669. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.875); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv364162434=6, srv1506033760=2, srv1969609615=4, srv659619600=7, srv1103794235=0, srv1139949285=1, srv1906360560=3, srv312723323=5, srv93043940=9, srv711083426=8} racks are {rack=0} 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=10, number of racks=1 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.30649131741006164 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.35369098029121115); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv707053245=6, srv786447174=7, srv1281372121=1, srv1499344217=3, srv1071394917=0, srv693951835=5, srv136097426=2, srv548459881=4, srv949578032=9, srv93938322=8} racks are {rack=0} 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=9, number of hosts=10, number of racks=1 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.34662045060658575 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.39999999999999997); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv770059678=9, srv1364769527=2, srv427346747=6, srv176011369=5, srv1047908761=0, srv1481574748=4, srv1352977481=1, srv668420503=8, srv1389964033=3, srv621664136=7} racks are {rack=0} 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-10T12:16:25,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.3851338340073176 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.44444444444444453); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv116654067=0, srv1400918395=2, srv174682433=4, srv1546944301=3, srv688425897=5, srv821826370=7, srv962190462=9, srv86240557=8, srv1194011380=1, srv705862614=6} racks are {rack=0} 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=123, number of hosts=10, number of racks=1 2024-11-10T12:16:25,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8002334382626535 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.923469387755102); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1769386360=6, srv487918751=8, srv968787542=9, srv1303739770=3, srv1155526094=0, srv1447026693=5, srv1850645645=7, srv1218961017=2, srv1428052492=4, srv1193117576=1} racks are {rack=0} 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:25,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=155, number of hosts=10, number of racks=1 2024-11-10T12:16:25,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8131812243798632 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9384111329343621); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv274016871=5, srv778035299=7, srv1923450164=3, srv356002529=6, srv1749754911=1, srv1775745442=2, srv1353006238=0, srv207865821=4} racks are {rack=0} 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-10T12:16:25,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.05755254949858986 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0664156421213727); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv79597018=7, srv2056947079=4, srv1180252936=0, srv2018380361=2, srv460414286=6, srv1877555499=1, srv406552227=5, srv2030121053=3} racks are {rack=0} 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.06673965003400768 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.07701755613924488); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1866966901=4, srv1497016133=2, srv1867757279=5, srv1298462940=0, srv519358653=6, srv1659330832=3, srv1378557314=1, srv984740842=7} racks are {rack=0} 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=130, number of hosts=8, number of racks=1 2024-11-10T12:16:25,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.28093705674099306 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.32420136347910594); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1456760613=2, srv280354303=5, srv1043157412=0, srv1280667380=1, srv1955706018=3, srv2091254830=4, srv357573681=6, srv45439667=7} racks are {rack=0} 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=140, number of hosts=8, number of racks=1 2024-11-10T12:16:25,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.07533492111851356 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.08693649897076465); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv586911106=3, srv1825670386=1, srv960689048=4, srv1124785198=0, srv2055506820=2} racks are {rack=0} 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=5, number of racks=1 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.21663778162911612, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-10T12:16:25,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 115 ms to try 80000 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.21663778162911612 to a new imbalance of 0.0024263431542461008. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-10T12:16:25,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv59046593=324, srv1073006223=20, srv1145305990=36, srv268313338=262, srv76104172=353, srv971474008=387, srv1023568060=6, srv1058945942=15, srv30394909=269, srv1151862092=38, srv792514649=359, srv1163142617=39, srv480829980=301, srv123746899=55, srv701374662=343, srv1928456444=205, srv646738632=335, srv1754981379=172, srv59553650=326, srv1390101314=86, srv1323169470=73, srv114088462=35, srv917200674=377, srv341153330=278, srv142804410=101, srv1520873213=119, srv25861257=258, srv1399285935=88, srv1901048001=199, srv1414423739=95, srv564784044=320, srv757635187=352, srv1596663260=133, srv1094612011=27, srv1800428396=179, srv2050647005=225, srv1175498099=43, srv1407049889=92, srv1730569431=166, srv2085943717=234, srv577086065=322, srv1436814124=103, srv14790229=109, srv189239101=198, srv1536028541=122, srv761434243=354, srv5432970=313, srv117284150=42, srv383781214=285, srv42442515=290, srv1225513094=53, srv1829935360=187, srv1124683774=31, srv247635257=254, srv102033475=4, srv1821236868=184, srv316081566=271, srv555734499=316, srv1093366069=25, srv366916572=283, srv1628047623=142, srv1652320859=148, srv319751669=272, srv1327957628=75, srv1070247301=19, srv1979410442=216, srv1250083393=60, srv1263384388=63, srv1367887316=80, srv1225429315=52, srv471240368=300, srv863983804=370, srv512117669=306, srv143175480=102, srv291030461=266, srv105420500=14, srv1423503071=99, srv1640631639=146, srv74077438=350, srv1873590524=193, srv1486907725=113, srv721526340=347, srv544176814=314, srv2006266367=221, srv712512114=344, srv1035420511=9, srv1004168540=2, srv969002566=386, srv656835003=337, srv891372466=373, srv1800593209=180, srv168176417=154, srv1088808210=21, srv1090713746=22, srv148133369=110, srv485130962=302, srv178018097=175, srv404842034=288, srv1168781152=41, srv254112016=256, srv592439632=325, srv1945113146=208, srv249588578=255, srv1370928274=82, srv2100288213=237, srv612805270=329, srv1828920442=186, srv926617452=381, srv1234330313=54, srv1323930404=74, srv1795932576=177, srv357244170=281, srv766594245=355, srv1669581710=151, srv2102045782=238, srv1825874398=185, srv1679858514=153, srv1419693817=97, srv205473010=227, srv1705092146=162, srv1553282772=127, srv464985381=298, srv618563335=330, srv1689682328=158, srv724005348=348, srv1474857723=108, srv1316391673=71, srv1360032461=78, srv412675123=289, srv238039536=250, srv667437999=339, srv135999132=77, srv1047064813=13, srv911536515=376, srv719801423=346, srv111511446=30, srv1240008028=56, srv1818502263=183, srv1346181371=76, srv1447201446=104, srv886610567=372, srv974511523=388, srv1987641239=218, srv1483259034=111, srv1545856894=125, srv1549763821=126, srv1045517387=12, srv1605412986=136, srv153205482=121, srv312786263=270, srv59704736=327, srv1864027229=192, srv894822520=374, srv1727090035=164, srv451692823=296, srv2023378212=222, srv1612515530=138, srv604040010=328, srv996651081=391, srv1000896198=0, srv780475800=358, srv1094376369=26, srv327306092=275, srv1102942943=29, srv430310293=292, srv834109265=366, srv1842539737=188, srv1967077970=211, srv1696672608=160, srv677621781=342, srv1452920101=107, srv1976907929=213, srv264916246=261, srv278845737=263, srv426352909=291, srv1020940032=5, srv1004200397=3, srv2134167424=244, srv487204361=303, srv1922007586=203, srv2114276329=241, srv1128984151=32, srv808106618=362, srv946359088=384, srv1988372140=219, srv1975507346=212, srv337915876=277, srv1963459031=210, srv2120775134=243, srv374242418=284, srv932508365=383, srv404155989=287, srv1796367479=178, srv1378552750=84, srv717500802=345, srv496851947=305, srv1000923465=1, srv867153575=371, srv1849249486=190, srv853230535=369, srv2069846617=228, srv432340369=293, srv162964565=143, srv1882286473=195, srv1977510348=214, srv1368716646=81, srv1193730296=49, srv1611577086=137, srv1753183404=171, srv1908309846=201, srv1561914055=130, srv1844553904=189, srv117977388=44, srv1604580902=135, srv287597796=265, srv1926840113=204, srv254662709=257, srv1569178655=131, srv1187605101=47, srv140390609=90, srv1376165325=83, srv559719838=318, srv1410492746=94, srv727459403=349, srv1985741979=217, srv1653213865=149, srv140182533=89, srv1422465963=98, srv751516751=351, srv975545347=389, srv1685788743=156, srv1061714112=17, srv960648076=385, srv8452800=368, srv1904486860=200, srv1180194580=45, srv1043985102=11, srv927758927=382, srv1425876527=100, srv264036678=259, srv1505317815=117, srv643848945=333, srv1308504822=70, srv1642497893=147, srv982483648=390, srv1197839142=50, srv813377427=363, srv1379244237=85, srv1728372340=165, srv1773911896=173, srv225024823=246, srv112932655=33, srv385003211=286, srv1685282025=155, srv1407548329=93, srv64549274=334, srv1992860254=220, srv2086674311=236, srv301992257=268, srv1977623011=215, srv1184994466=46, srv2086287605=235, srv1484830540=112, srv2026324890=223, srv229427783=248, srv1701061217=161, srv2048180010=224, srv918368052=378, srv17522575=170, srv1316648453=72, srv320633949=273, srv1246717427=59, srv2107603690=240, srv1750864138=169, srv652298129=336, srv1932308705=206, srv1392210650=87, srv161588399=140, srv1688741280=157, srv1414541342=96, srv1733866973=167, srv525534885=310, srv1631091480=144, srv1545226813=124, srv1633555623=145, srv49274574=304, srv299120679=267, srv279766045=264, srv840284587=367, srv820700482=364, srv1028124074=8, srv1602846163=134, srv1891661787=197, srv1092597093=24, srv1717044524=163, srv1167603548=40, srv1543743521=123, srv2085722954=233, srv1576143962=132, srv2105870923=239, srv767234634=356, srv1240754609=57, srv1558627977=129, srv181202961=181, srv245108090=251, srv1148409840=37, srv443895264=294, srv1279639832=66, srv456835638=297, srv1785340344=176, srv109076006=23, srv227626356=247, srv1035758442=10, srv230361106=249, srv1679328466=152, srv1817046459=182, srv35583401=280, srv92279271=379, srv245643279=252, srv1293593164=67, srv469937627=299, srv923595879=380, srv637609595=331, srv2051378867=226, srv125349928=61, srv1258278635=62, srv264414657=260, srv519441622=309, srv1913394054=202, srv1193552787=48, srv1406656868=91, srv451049723=295, srv1308419578=69, srv583690535=323, srv1934657268=207, srv1362786707=79, srv795258026=360, srv144739355=105, srv352617407=279, srv527832008=311, srv1659827900=150, srv1488835312=114, srv1265929458=64, srv1615064111=139, srv997030275=392, srv1135596899=34, srv1953199687=209, srv1269229106=65, srv325047314=274, srv544809883=315, srv1779996737=174, srv1490407668=115, srv174264582=168, srv1493448138=116, srv660654549=338, srv359093012=282, srv1098195641=28, srv1023956175=7, srv569543331=321, srv1069071232=18, srv208106283=231, srv1873785179=194, srv1450542330=106, srv1558123780=128, srv185262002=191, srv1691421229=159, srv1059821110=16, srv1520473911=118, srv670946830=341, srv1621392714=141, srv2077252051=229, srv669128973=340, srv513487834=307, srv559855061=319, srv769511269=357, srv2118270428=242, srv517532416=308, srv1295875343=68, srv556024865=317, srv53092877=312, srv798030990=361, srv90601870=375, srv2080132947=230, srv12450108=58, srv2134952154=245, srv823457982=365, srv2081942355=232, srv1221688904=51, srv245906406=253, srv1526739826=120, srv329070877=276, srv1885793085=196, srv642026145=332} racks are {rack=0} 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-10T12:16:25,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-10T12:16:25,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-10T12:16:25,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-10T12:16:25,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-10T12:16:25,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-10T12:16:25,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-10T12:16:25,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-10T12:16:25,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-10T12:16:25,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-10T12:16:25,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-10T12:16:25,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-10T12:16:25,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=393, number of racks=1 2024-11-10T12:16:25,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999984); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:25,208 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:17606400 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-10T12:16:25,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.866551126516463, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999984); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-10T12:16:30,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 5154 ms to try 1000000 different iterations. Found a solution that moves 55 regions; Going from a computed imbalance of 0.866551126516463 to a new imbalance of 0.011915077989601387. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9821428571428571); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-10T12:16:30,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-10T12:16:30,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-10T12:16:30,380 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=13 (was 12) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=286 (was 286), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=203 (was 196) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5416 (was 6266)